vcpu_ptc_e: fix flush order.
vcpu_ptc_g: fix typo (only local vcpu v-tlb was flushed)
itlb_pte/dtlb_pte removed.
vcpu_itr_* and vcpu_itc_no_srlz call vcpu_set_tr_entry coherently.
in_tpa parameter of vcpu_translate removed.
handle_lazy_cover is now static and unused 'isr' removed.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
- DEFINE(IA64_VCPU_DTLB_PTE_OFFSET, offsetof (struct vcpu, arch.dtlb_pte));
- DEFINE(IA64_VCPU_ITLB_PTE_OFFSET, offsetof (struct vcpu, arch.itlb_pte));
DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
#undef FAST_ITC //XXX CONFIG_XEN_IA64_DOM0_VP
// TODO fast_itc doesn't suport dom0 vp yet.
#else
-//#define FAST_ITC // working but default off for now
+//#define FAST_ITC // to be reviewed
#endif
#define FAST_BREAK
#ifndef CONFIG_XEN_IA64_DOM0_VP
GLOBAL_ENTRY(fast_tlb_miss_reflect)
#ifndef FAST_TLB_MISS_REFLECT // see beginning of file
br.spnt.few page_fault ;;
-#endif
+#else
mov r31=pr
mov r30=cr.ipsr
mov r29=cr.iip
mov r29=cr.iip
mov r30=cr.ipsr
br.sptk.many fast_reflect;;
+#endif
END(fast_tlb_miss_reflect)
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_itc_d)
#ifndef FAST_ITC
br.sptk.many dispatch_break_fault ;;
-#endif
+#else
// ensure itir.ps >= xen's pagesize
adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r23=[r23];;
movl r30=recover_and_dispatch_break_fault ;;
mov r16=r8;;
// fall through
+#endif
+#if defined(FAST_ITC) || defined (FAST_TLB_MISS_REFLECT)
// fast_insert(PSCB(ifa),r24=ps,r16=pte)
// r16 == pte
rfi
;;
END(fast_insert)
-
+#endif
}
unsigned long lazy_cover_count = 0;
-int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
+static int
+handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
{
if (!PSCB(v,interrupt_collection_enabled)) {
PSCB(v,ifs) = regs->cr_ifs;
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
IA64FAULT fault;
- if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
+ if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
{
}
again:
- fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
+ fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
u64 logps;
pteval = translate_domain_pte(pteval, address, itir, &logps);
while(vector);
return;
}
- if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
+ if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
PSCB(current,ifa) = ifa;
PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
reflect_interruption(isr,regs,vector);
return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
}
-// in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
-IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
+IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
{
unsigned long region = address >> 61;
unsigned long pta, rid, rr;
pte = trp->pte;
if (/* is_data && */ pte.p
&& vcpu_match_tr_entry_no_p(trp,address,rid)) {
-#ifndef CONFIG_XEN_IA64_DOM0_VP
- if (vcpu->domain==dom0 && !in_tpa)
- *pteval = pte.val;
- else
-#endif
- *pteval = vcpu->arch.dtlb_pte;
+ *pteval = pte.val;
*itir = trp->itir;
dtlb_translate_count++;
return IA64_USE_TLB;
UINT64 pteval, itir, mask, iha;
IA64FAULT fault;
- fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
+ fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
{
mask = itir_mask(itir);
if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
return;
if (IorD & 0x1) {
- vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
- PSCBX(vcpu,itlb_pte) = mp_pte;
+ vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
}
if (IorD & 0x2) {
- vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
- PSCBX(vcpu,dtlb_pte) = mp_pte;
+ vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
}
}
// architected loop to purge the entire TLB, should use
// base = stride1 = stride2 = 0, count0 = count 1 = 1
+ // just invalidate the "whole" tlb
+ vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
+ vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
#ifdef VHPT_GLOBAL
vhpt_flush(); // FIXME: This is overdoing it
#endif
local_flush_tlb_all();
- // just invalidate the "whole" tlb
- vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
- vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
return IA64_NO_FAULT;
}
/* Purge TC entries.
FIXME: clear only if match. */
- vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
- vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+ vcpu_purge_tr_entry(&PSCBX(v,dtlb));
+ vcpu_purge_tr_entry(&PSCBX(v,itlb));
#ifdef VHPT_GLOBAL
/* Invalidate VHPT entries. */
TR_ENTRY dtlb;
unsigned int itr_regions;
unsigned int dtr_regions;
- unsigned long itlb_pte;
- unsigned long dtlb_pte;
unsigned long irr[4];
unsigned long insvc[4];
unsigned long tc_regions;
extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address,
- BOOLEAN is_data, BOOLEAN in_tpa,
+extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
UINT64 *pteval, UINT64 *itir, UINT64 *iha);
extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);